vm-event: proper vCPU-paused checks at resume
authorCorneliu ZUZU <czuzu@bitdefender.com>
Mon, 4 Jul 2016 10:11:44 +0000 (12:11 +0200)
committerJan Beulich <jbeulich@suse.com>
Mon, 4 Jul 2016 10:11:44 +0000 (12:11 +0200)
A VM_EVENT_FLAG_VCPU_PAUSED flag in a vm-event response should only be treated
as informative that the toolstack user wants the vm-event subsystem to unpause
the target vCPU, but not be relied upon to decide if the target vCPU is actually
paused.

That being said, this patch does the following:

* Fixes (replaces) the old behavior in vm_event_resume, which relied on
  VM_EVENT_FLAG_VCPU_PAUSED to determine if the target vCPU is paused, by
  actually checking the vCPU vm-event pause-count.

* ASSERTs that the vCPU is paused in vm_event_set_registers and
  vm_event_toggle_singlestep.

* Ignores VM_EVENT_FLAG_DENY @ vm_event_register_write_resume if the target vCPU
  is not paused. Also adjusts comment in public/vm_event.h to reflect that.

Signed-off-by: Corneliu ZUZU <czuzu@bitdefender.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
xen/arch/x86/vm_event.c
xen/common/vm_event.c
xen/include/public/vm_event.h

index a9d38610d5691a99befe971f4f15b2142debc007..80f84d6ee7b0eaca790ea0789d09c55005928934 100644 (file)
@@ -61,9 +61,11 @@ void vm_event_cleanup_domain(struct domain *d)
 
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 {
-    if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
+    if ( !is_hvm_domain(d) )
         return;
 
+    ASSERT(atomic_read(&v->vm_event_pause_count));
+
     hvm_toggle_singlestep(v);
 }
 
@@ -75,6 +77,10 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
 
         ASSERT(w);
 
+        /* deny flag requires the vCPU to be paused */
+        if ( !atomic_read(&v->vm_event_pause_count) )
+            return;
+
         switch ( rsp->reason )
         {
         case VM_EVENT_REASON_MOV_TO_MSR:
@@ -100,6 +106,8 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
 
 void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
 {
+    ASSERT(atomic_read(&v->vm_event_pause_count));
+
     v->arch.user_regs.eax = rsp->data.regs.x86.rax;
     v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
     v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
index b303180ef44626ef69c87659cf6119c3b00f69aa..17d27166b621cdf87adbf32d904164e08c912011 100644 (file)
@@ -417,7 +417,8 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
         if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
             p2m_altp2m_check(v, rsp.altp2m_idx);
 
-        if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+        /* Check flags which apply only when the vCPU is paused */
+        if ( atomic_read(&v->vm_event_pause_count) )
         {
             if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
                 vm_event_set_registers(v, &rsp);
@@ -425,7 +426,8 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
             if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
                 vm_event_toggle_singlestep(d, v);
 
-            vm_event_vcpu_unpause(v);
+            if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+                vm_event_vcpu_unpause(v);
         }
     }
 }
index 68bddfb0e2e8076fd6e129c0135dcc7af566f764..7bfe6cc4f39d4bbe3fcdb97d2553dcd59545295e 100644 (file)
@@ -77,6 +77,7 @@
  /*
   * Deny completion of the operation that triggered the event.
   * Currently only useful for MSR, CR0, CR3 and CR4 write events.
+  * Requires the vCPU to be paused already (synchronous events only).
   */
 #define VM_EVENT_FLAG_DENY               (1 << 6)
 /*